load libraries
import os
import cv2
import glob
import numpy as np
import pandas as pd
from keras.models import *
from keras.optimizers import *
from keras.layers import *
from keras.applications import *
from keras.preprocessing.image import *
dir = "/ext/Data/distracted_driver_detection/"
model_image_size = (320, 480)
fine_tune_layer = 172
final_layer = 314
visual_layer = 311
batch_size = 128
load train data
train_gen = ImageDataGenerator(
featurewise_std_normalization=True,
samplewise_std_normalization=False,
rotation_range=10.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.1,
zoom_range=0.1,
)
gen = ImageDataGenerator(
featurewise_std_normalization=True,
samplewise_std_normalization=False,
)
train_generator = train_gen.flow_from_directory(os.path.join(dir, 'train'), model_image_size, shuffle=True, batch_size=batch_size, class_mode="categorical")
print("subdior to train type {}".format(train_generator.class_indices))
valid_generator = gen.flow_from_directory(os.path.join(dir, 'valid'), model_image_size, shuffle=True, batch_size=batch_size, class_mode="categorical")
print("subdior to valid type {}".format(valid_generator.class_indices))
input_tensor = Input((*model_image_size, 3))
x = input_tensor
x = Lambda(inception_v3.preprocess_input)(x)
base_model = InceptionV3(input_tensor=x, weights='imagenet', include_top=False)
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(10, activation='softmax')(x)
model = Model(base_model.input, x)
print("total layer count {}".format(len(base_model.layers)))
for i in range(fine_tune_layer):
model.layers[i].trainable = False
print("train_generator.samples = {}".format(train_generator.samples))
print("valid_generator.samples = {}".format(valid_generator.samples))
steps_train_sample = train_generator.samples // 128 + 1
steps_valid_sample = valid_generator.samples // 128 + 1
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=steps_train_sample, epochs=4, validation_data=valid_generator, validation_steps=steps_valid_sample)
model.save("models/inceptionV3-imagenet-finetune{}-adam.h5".format(fine_tune_layer))
print("model saved!")
model.compile(optimizer=RMSprop(lr=1*0.00001), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=steps_train_sample, epochs=6, validation_data=valid_generator, validation_steps=steps_valid_sample)
model.save("models/inceptionV3-imagenet-finetune{}.h5".format(fine_tune_layer))
print("model saved!")
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.models import *
model = load_model("models/inceptionV3-imagenet-finetune{}.h5".format(fine_tune_layer))
print("load successed")
#SVG(model_to_dot(model).create(prog='dot', format='svg'))
http://cnnlocalization.csail.mit.edu/

$cam = (P-0.5)*w*output$
z = zip([x.name for x in model.layers], range(len(model.layers)))
for k, v in z:
print("{} - {}".format(k,v))
import matplotlib.pyplot as plt
import random
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
def show_heatmap_image(model_show, weights_show):
test_dir = os.path.join(dir, "test", "test" )
image_files = glob.glob(os.path.join(test_dir,"*"))
print(len(image_files))
plt.figure(figsize=(12, 24))
for i in range(10):
plt.subplot(5, 2, i+1)
img = cv2.imread(image_files[2000*i+113])
img = cv2.resize(img, (model_image_size[1],model_image_size[0]))
x = img.copy()
x.astype(np.float32)
out, predictions = model_show.predict(np.expand_dims(x, axis=0))
predictions = predictions[0]
out = out[0]
max_idx = np.argmax(predictions)
prediction = predictions[max_idx]
status = ["safe driving", " texting - right", "phone - right", "texting - left", "phone - left",
"operation radio", "drinking", "reaching behind", "hair and makeup", "talking"]
plt.title('c%d |%s| %.2f%%' % (max_idx , status[max_idx], prediction*100))
cam = (prediction - 0.5) * np.matmul(out, weights_show)
cam = cam[:,:,max_idx]
cam -= cam.min()
cam /= cam.max()
cam -= 0.2
cam /= 0.8
cam = cv2.resize(cam, (model_image_size[1],model_image_size[0]))
heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
heatmap[np.where(cam <= 0.2)] = 0
out = cv2.addWeighted(img, 0.8, heatmap, 0.4, 0)
plt.axis('off')
plt.imshow(out[:,:,::-1])
print("done")
weights = model.layers[final_layer].get_weights()[0]
layer_output = model.layers[visual_layer].output
model2 = Model(model.input, [layer_output, model.output])
print("layer_output {0}".format(layer_output))
print("weights shape {0}".format(weights.shape))
show_heatmap_image(model2, weights)
def gen_kaggle_csv(model, model_image_size, csv_name):
dir = "/ext/Data/distracted_driver_detection/"
gen = ImageDataGenerator()
test_generator = gen.flow_from_directory(dir + "test/", model_image_size, shuffle=False,
batch_size=batch_size, class_mode=None)
# s = test_generator.__dict__
# del s['filenames']
# print(s)
y_pred = model.predict_generator(test_generator, steps=test_generator.samples//batch_size+1, verbose=1)
print("y_pred shape {}".format(y_pred.shape))
y_pred = y_pred.clip(min=0.005, max=0.995)
print(y_pred[:3])
l = list()
for i, fname in enumerate(test_generator.filenames):
name = fname[fname.rfind('/')+1:]
l.append( [name, *y_pred[i]] )
l = np.array(l)
data = {'img': l[:,0]}
for i in range(10):
data["c%d"%i] = l[:,i+1]
df = pd.DataFrame(data, columns=['img'] + ['c%d'%i for i in range(10)])
df.head(10)
df = df.sort_values(by='img')
df.to_csv(csv_name, index=None, float_format='%.3f')
print("csv saved")
print("done")
gen_kaggle_csv(model, model_image_size, 'csv/InceptionV3-imagenet-finetune{}-pred.csv'.format(fine_tune_layer))